Move event/failsafe callback addresses into thread_info (arch-dep).
Make the trap_bounce per-domain rather than per-cpu.
memcpy(c->debugreg,
d->thread.debugreg,
sizeof(d->thread.debugreg));
- c->event_callback_cs =
- d->event_selector;
- c->event_callback_eip =
- d->event_address;
- c->failsafe_callback_cs =
- d->failsafe_selector;
- c->failsafe_callback_eip =
- d->failsafe_address;
+ c->event_callback_cs = d->thread.event_selector;
+ c->event_callback_eip = d->thread.event_address;
+ c->failsafe_callback_cs = d->thread.failsafe_selector;
+ c->failsafe_callback_eip = d->thread.failsafe_address;
}
for ( i = 0; i < 8; i++ )
(void)set_debugreg(d, i, c->debugreg[i]);
- d->event_selector = c->event_callback_cs;
- d->event_address = c->event_callback_eip;
- d->failsafe_selector = c->failsafe_callback_cs;
- d->failsafe_address = c->failsafe_callback_eip;
+ d->thread.event_selector = c->event_callback_cs;
+ d->thread.event_address = c->event_callback_eip;
+ d->thread.failsafe_selector = c->failsafe_callback_cs;
+ d->thread.failsafe_address = c->failsafe_callback_eip;
phys_basetab = c->pt_base;
d->mm.pagetable = mk_pagetable(phys_basetab);
* We're basically forcing default RPLs to 1, so that our "what privilege
* level are we returning to?" logic works.
*/
- p->failsafe_selector = FLAT_GUESTOS_CS;
- p->event_selector = FLAT_GUESTOS_CS;
+ p->thread.failsafe_selector = FLAT_GUESTOS_CS;
+ p->thread.event_selector = FLAT_GUESTOS_CS;
p->thread.guestos_ss = FLAT_GUESTOS_DS;
for ( i = 0; i < 256; i++ )
p->thread.traps[i].cs = FLAT_GUESTOS_CS;
void pdb_handle_debug_trap(struct xen_regs *regs, long error_code)
{
unsigned int condition;
- struct domain *tsk = current;
- struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
+ struct domain *d = current;
+ struct trap_bounce *tb = &d->thread.trap_bounce;
__asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
if ( (condition & (1 << 14)) != (1 << 14) )
if ( pdb_handle_exception(1, regs) != 0 )
{
- tsk->thread.debugreg[6] = condition;
+ d->thread.debugreg[6] = condition;
- gtb->flags = GTBF_TRAP_NOCODE;
- gtb->cs = tsk->thread.traps[1].cs;
- gtb->eip = tsk->thread.traps[1].address;
+ tb->flags = TBF_TRAP_NOCODE;
+ tb->cs = d->thread.traps[1].cs;
+ tb->eip = d->thread.traps[1].address;
}
}
#include <asm/i387.h>
#include <asm/debugger.h>
-struct guest_trap_bounce guest_trap_bounce[NR_CPUS] = { { 0 } };
-
#if defined(__i386__)
#define DOUBLEFAULT_STACK_SIZE 1024
struct xen_regs *regs,
long error_code, int use_error_code)
{
- struct domain *p = current;
- struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
+ struct domain *d = current;
+ struct trap_bounce *tb = &d->thread.trap_bounce;
trap_info_t *ti;
unsigned long fixup;
goto xen_fault;
ti = current->thread.traps + trapnr;
- gtb->flags = use_error_code ? GTBF_TRAP : GTBF_TRAP_NOCODE;
- gtb->error_code = error_code;
- gtb->cs = ti->cs;
- gtb->eip = ti->address;
+ tb->flags = use_error_code ? TBF_TRAP : TBF_TRAP_NOCODE;
+ tb->error_code = error_code;
+ tb->cs = ti->cs;
+ tb->eip = ti->address;
if ( TI_GET_IF(ti) )
- p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
return;
xen_fault:
asmlinkage void do_int3(struct xen_regs *regs, long error_code)
{
- struct domain *p = current;
- struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
+ struct domain *d = current;
+ struct trap_bounce *tb = &d->thread.trap_bounce;
trap_info_t *ti;
DEBUGGER_trap_entry(TRAP_int3, regs, error_code);
}
ti = current->thread.traps + 3;
- gtb->flags = GTBF_TRAP_NOCODE;
- gtb->error_code = error_code;
- gtb->cs = ti->cs;
- gtb->eip = ti->address;
+ tb->flags = TBF_TRAP_NOCODE;
+ tb->error_code = error_code;
+ tb->cs = ti->cs;
+ tb->eip = ti->address;
if ( TI_GET_IF(ti) )
- p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
}
asmlinkage void do_double_fault(void)
asmlinkage void do_page_fault(struct xen_regs *regs, long error_code)
{
- struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
trap_info_t *ti;
unsigned long off, addr, fixup;
struct domain *d = current;
extern int map_ldt_shadow_page(unsigned int);
+ struct trap_bounce *tb = &d->thread.trap_bounce;
int cpu = d->processor;
__asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
goto xen_fault;
ti = d->thread.traps + 14;
- gtb->flags = GTBF_TRAP_CR2; /* page fault pushes %cr2 */
- gtb->cr2 = addr;
- gtb->error_code = error_code;
- gtb->cs = ti->cs;
- gtb->eip = ti->address;
+ tb->flags = TBF_TRAP_CR2; /* page fault pushes %cr2 */
+ tb->cr2 = addr;
+ tb->error_code = error_code;
+ tb->cs = ti->cs;
+ tb->eip = ti->address;
if ( TI_GET_IF(ti) )
d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
return;
asmlinkage void do_general_protection(struct xen_regs *regs, long error_code)
{
struct domain *d = current;
- struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
+ struct trap_bounce *tb = &d->thread.trap_bounce;
trap_info_t *ti;
unsigned long fixup;
ti = current->thread.traps + (error_code>>3);
if ( TI_GET_DPL(ti) >= (regs->cs & 3) )
{
- gtb->flags = GTBF_TRAP_NOCODE;
+ tb->flags = TBF_TRAP_NOCODE;
regs->eip += 2;
goto finish_propagation;
}
/* Pass on GPF as is. */
ti = current->thread.traps + 13;
- gtb->flags = GTBF_TRAP;
- gtb->error_code = error_code;
+ tb->flags = TBF_TRAP;
+ tb->error_code = error_code;
finish_propagation:
- gtb->cs = ti->cs;
- gtb->eip = ti->address;
+ tb->cs = ti->cs;
+ tb->eip = ti->address;
if ( TI_GET_IF(ti) )
d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
return;
if ( test_and_clear_bit(DF_GUEST_STTS, ¤t->flags) )
{
- struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
- gtb->flags = GTBF_TRAP_NOCODE;
- gtb->cs = current->thread.traps[7].cs;
- gtb->eip = current->thread.traps[7].address;
+ struct trap_bounce *tb = ¤t->thread.trap_bounce;
+ tb->flags = TBF_TRAP_NOCODE;
+ tb->cs = current->thread.traps[7].cs;
+ tb->eip = current->thread.traps[7].address;
}
}
asmlinkage void do_debug(struct xen_regs *regs, long error_code)
{
unsigned int condition;
- struct domain *tsk = current;
- struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
+ struct domain *d = current;
+ struct trap_bounce *tb = &d->thread.trap_bounce;
DEBUGGER_trap_entry(TRAP_debug, regs, error_code);
/* Mask out spurious debug traps due to lazy DR7 setting */
if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
- (tsk->thread.debugreg[7] == 0) )
+ (d->thread.debugreg[7] == 0) )
{
__asm__("movl %0,%%db7" : : "r" (0));
return;
}
/* Save debug status register where guest OS can peek at it */
- tsk->thread.debugreg[6] = condition;
+ d->thread.debugreg[6] = condition;
- gtb->flags = GTBF_TRAP_NOCODE;
- gtb->cs = tsk->thread.traps[1].cs;
- gtb->eip = tsk->thread.traps[1].address;
+ tb->flags = TBF_TRAP_NOCODE;
+ tb->cs = d->thread.traps[1].cs;
+ tb->eip = d->thread.traps[1].address;
}
unsigned long failsafe_selector,
unsigned long failsafe_address)
{
- struct domain *p = current;
+ struct domain *d = current;
if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
return -EPERM;
- p->event_selector = event_selector;
- p->event_address = event_address;
- p->failsafe_selector = failsafe_selector;
- p->failsafe_address = failsafe_address;
+ d->thread.event_selector = event_selector;
+ d->thread.event_address = event_address;
+ d->thread.failsafe_selector = failsafe_selector;
+ d->thread.failsafe_address = failsafe_address;
return 0;
}
OFFSET(DOMAIN_processor, struct domain, processor);
OFFSET(DOMAIN_shared_info, struct domain, shared_info);
- OFFSET(DOMAIN_event_sel, struct domain, event_selector);
- OFFSET(DOMAIN_event_addr, struct domain, event_address);
- OFFSET(DOMAIN_failsafe_sel, struct domain, failsafe_selector);
- OFFSET(DOMAIN_failsafe_addr, struct domain, failsafe_address);
+ OFFSET(DOMAIN_event_sel, struct domain, thread.event_selector);
+ OFFSET(DOMAIN_event_addr, struct domain, thread.event_address);
+ OFFSET(DOMAIN_failsafe_sel, struct domain, thread.failsafe_selector);
+ OFFSET(DOMAIN_failsafe_addr, struct domain, thread.failsafe_address);
+ OFFSET(DOMAIN_trap_bounce, struct domain, thread.trap_bounce);
BLANK();
OFFSET(SHINFO_upcall_pending, shared_info_t,
vcpu_data[0].evtchn_upcall_mask);
BLANK();
- OFFSET(GTB_error_code, struct guest_trap_bounce, error_code);
- OFFSET(GTB_cr2, struct guest_trap_bounce, cr2);
- OFFSET(GTB_flags, struct guest_trap_bounce, flags);
- OFFSET(GTB_cs, struct guest_trap_bounce, cs);
- OFFSET(GTB_eip, struct guest_trap_bounce, eip);
+ OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
+ OFFSET(TRAPBOUNCE_cr2, struct trap_bounce, cr2);
+ OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
+ OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
+ OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
BLANK();
}
/* No special register assumptions */
failsafe_callback:
GET_CURRENT(%ebx)
- movl DOMAIN_processor(%ebx),%eax
- shl $4,%eax
- lea guest_trap_bounce(%eax),%edx
+ leal DOMAIN_trap_bounce(%ebx),%edx
movl DOMAIN_failsafe_addr(%ebx),%eax
- movl %eax,GTB_eip(%edx)
+ movl %eax,TRAPBOUNCE_eip(%edx)
movl DOMAIN_failsafe_sel(%ebx),%eax
- movw %ax,GTB_cs(%edx)
+ movw %ax,TRAPBOUNCE_cs(%edx)
call create_bounce_frame
subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
movl XREGS_ds(%esp),%eax
jz restore_all_guest
movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery
/*process_guest_events:*/
- movl DOMAIN_processor(%ebx),%edx
- shl $4,%edx # sizeof(guest_trap_bounce) == 16
- lea guest_trap_bounce(%edx),%edx
+ leal DOMAIN_trap_bounce(%ebx),%edx
movl DOMAIN_event_addr(%ebx),%eax
- movl %eax,GTB_eip(%edx)
+ movl %eax,TRAPBOUNCE_eip(%edx)
movl DOMAIN_event_sel(%ebx),%eax
- movw %ax,GTB_cs(%edx)
+ movw %ax,TRAPBOUNCE_cs(%edx)
call create_bounce_frame
jmp restore_all_guest
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
/* {EIP, CS, EFLAGS, [ESP, SS]} */
-/* %edx == guest_trap_bounce, %ebx == task_struct */
+/* %edx == trap_bounce, %ebx == task_struct */
/* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
create_bounce_frame:
mov XREGS_cs+4(%esp),%cl
movl %eax,XREGS_eflags+4(%esp)
movl %gs,XREGS_ss+4(%esp)
movl %esi,XREGS_esp+4(%esp)
- movzwl GTB_cs(%edx),%eax
+ movzwl TRAPBOUNCE_cs(%edx),%eax
movl %eax,XREGS_cs+4(%esp)
- movl GTB_eip(%edx),%eax
+ movl TRAPBOUNCE_eip(%edx),%eax
movl %eax,XREGS_eip+4(%esp)
ret
ALIGN
process_guest_exception_and_events:
- movl DOMAIN_processor(%ebx),%eax
- shl $4,%eax
- lea guest_trap_bounce(%eax),%edx
- testb $~0,GTB_flags(%edx)
+ leal DOMAIN_trap_bounce(%ebx),%edx
+ testb $~0,TRAPBOUNCE_flags(%edx)
jz test_all_events
call create_bounce_frame # just the basic frame
- mov GTB_flags(%edx),%cl
- test $GTBF_TRAP_NOCODE,%cl
+ mov TRAPBOUNCE_flags(%edx),%cl
+ test $TBF_TRAP_NOCODE,%cl
jnz 2f
subl $4,%esi # push error_code onto guest frame
- movl GTB_error_code(%edx),%eax
+ movl TRAPBOUNCE_error_code(%edx),%eax
FAULT13:movl %eax,%gs:(%esi)
- test $GTBF_TRAP_CR2,%cl
+ test $TBF_TRAP_CR2,%cl
jz 1f
subl $4,%esi # push %cr2 onto guest frame
- movl GTB_cr2(%edx),%eax
+ movl TRAPBOUNCE_cr2(%edx),%eax
FAULT14:movl %eax,%gs:(%esi)
1: movl %esi,XREGS_esp(%esp)
-2: movb $0,GTB_flags(%edx)
+2: movb $0,TRAPBOUNCE_flags(%edx)
jmp test_all_events
ALIGN
{
struct domain *d = current;
trap_info_t *ti;
- struct guest_trap_bounce *gtb;
+ struct trap_bounce *tb;
u8 modrm, mod, reg, rm, decode;
void *memreg, *regreg;
unsigned long offset;
if ( VM_ASSIST(d, VMASST_TYPE_4gb_segments_notify) )
{
ti = &d->thread.traps[15];
- gtb = &guest_trap_bounce[d->processor];
- gtb->flags = GTBF_TRAP;
- gtb->error_code = pb - eip;
- gtb->cs = ti->cs;
- gtb->eip = ti->address;
+ tb = &d->thread.trap_bounce;
+ tb->flags = TBF_TRAP;
+ tb->error_code = pb - eip;
+ tb->cs = ti->cs;
+ tb->eip = ti->address;
if ( TI_GET_IF(ti) )
d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
}
#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
/*
- * 'guest_trap_bounce' flags values.
+ * 'trap_bounce' flags values.
*/
-#define GTBF_TRAP 1
-#define GTBF_TRAP_NOCODE 2
-#define GTBF_TRAP_CR2 4
+#define TBF_TRAP 1
+#define TBF_TRAP_NOCODE 2
+#define TBF_TRAP_CR2 4
#ifndef __ASSEMBLY__
u32 __cacheline_filler[5];
};
+struct trap_bounce {
+ unsigned long error_code;
+ unsigned long cr2;
+ unsigned short flags; /* TBF_ */
+ unsigned short cs;
+ unsigned long eip;
+};
+
struct thread_struct {
unsigned long guestos_sp;
unsigned long guestos_ss;
-/* Hardware debugging registers */
+
+ /* Hardware debugging registers */
unsigned long debugreg[8]; /* %%db0-7 debug registers */
-/* floating point info */
+
+ /* floating point info */
struct i387_state i387;
-/* general user-visible register state */
+
+ /* general user-visible register state */
execution_context_t user_ctxt;
-/* Trap info. */
+
+ /*
+ * Return vectors pushed to us by guest OS.
+ * The stack frame for events is exactly that of an x86 hardware interrupt.
+ * The stack frame for a failsafe callback is augmented with saved values
+ * for segment registers %ds, %es, %fs and %gs:
+ * %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
+ */
+ unsigned long event_selector; /* 08: entry CS */
+ unsigned long event_address; /* 12: entry EIP */
+
+ unsigned long failsafe_selector; /* 16: entry CS */
+ unsigned long failsafe_address; /* 20: entry EIP */
+
+ /* Bounce information for propagating an exception to guest OS. */
+ struct trap_bounce trap_bounce;
+
+ /* Trap info. */
#ifdef __i386__
int fast_trap_idx;
struct desc_struct fast_trap_desc;
long set_fast_trap(struct domain *p, int idx);
-#define INIT_THREAD { \
- 0, 0, \
- { [0 ... 7] = 0 }, /* debugging registers */ \
- { { 0, }, }, /* 387 state */ \
- { 0 }, \
- 0x20, { 0, 0 }, /* DEFAULT_FAST_TRAP */ \
- { {0} } /* io permissions */ \
-}
+#define INIT_THREAD { fast_trap_idx: 0x20 }
#elif defined(__x86_64__)
#endif /* __x86_64__ */
-struct guest_trap_bounce {
- unsigned long error_code; /* 0 */
- unsigned long cr2; /* 4 */
- unsigned short flags; /* 8 */
- unsigned short cs; /* 10 */
- unsigned long eip; /* 12 */
-};
-extern struct guest_trap_bounce guest_trap_bounce[];
-
extern int gpf_emulate_4gb(struct xen_regs *regs);
struct mm_struct {
struct domain
{
- /*
- * DO NOT CHANGE THE ORDER OF THE FOLLOWING.
- * Their offsets are hardcoded in entry.S
- */
-
- u32 processor; /* 00: current processor */
-
- /* An unsafe pointer into a shared data area. */
- shared_info_t *shared_info; /* 04: shared data area */
-
- /*
- * Return vectors pushed to us by guest OS.
- * The stack frame for events is exactly that of an x86 hardware interrupt.
- * The stack frame for a failsafe callback is augmented with saved values
- * for segment registers %ds, %es, %fs and %gs:
- * %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
- */
- unsigned long event_selector; /* 08: entry CS */
- unsigned long event_address; /* 12: entry EIP */
-
- unsigned long failsafe_selector; /* 16: entry CS */
- unsigned long failsafe_address; /* 20: entry EIP */
+ u32 processor;
- /*
- * From here on things can be added and shuffled without special attention
- */
+ shared_info_t *shared_info;
domid_t id;
s_time_t create_time;